}
h_inservice = highest_inservice_irq(vcpu);
- vpsr.val = vmx_vcpu_get_psr(vcpu);
+ vpsr.val = VCPU(vcpu, vpsr);
mask = irq_masked(vcpu, h_pending, h_inservice);
if ( vpsr.i && IRQ_NO_MASKED == mask ) {
isr = vpsr.val & IA64_PSR_RI;
IA64_PSR vpsr;
uint64_t isr;
REGS *regs=vcpu_regs(vcpu);
- vpsr.val = vmx_vcpu_get_psr(vcpu);
+ vpsr.val = VCPU(vcpu, vpsr);
update_vhpi(vcpu, NULL_VECTOR);
isr = vpsr.val & IA64_PSR_RI;
if ( !vpsr.ic )
tpr_t vtpr;
IA64_PSR vpsr;
- vpsr.val = vmx_vcpu_get_psr(vcpu);
+ vpsr.val = VCPU(vcpu, vpsr);
vtpr.val = VCPU(vcpu, tpr);
threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
PTA vpta;
IA64_PSR vpsr;
- vpsr.val = vmx_vcpu_get_psr(vcpu);
+ vpsr.val = VCPU(vcpu, vpsr);
vcpu_get_rr(vcpu, vadr, &vrr.rrval);
vmx_vcpu_get_pta(vcpu,&vpta.val);
visr.val=0;
visr.ei=pt_isr.ei;
visr.ir=pt_isr.ir;
- vpsr.val = vmx_vcpu_get_psr(vcpu);
+ vpsr.val = VCPU(vcpu, vpsr);
if(vpsr.ic==0){
visr.ni=1;
}
{
IA64_PSR vpsr;
u64 value;
- vpsr.val = vmx_vcpu_get_psr(vcpu);
+ vpsr.val = VCPU(vcpu, vpsr);
/* Vol2, Table 8-1 */
if ( vpsr.ic ) {
if ( set_ifa){
;;
-#define PAL_VSA_SYNC_READ_CLEANUP_PSR_PL \
- /* begin to call pal vps sync_read and cleanup psr.pl */ \
+#define PAL_VSA_SYNC_READ \
+ /* begin to call pal vps sync_read */ \
add r25=IA64_VPD_BASE_OFFSET, r21; \
movl r20=__vsa_base; \
;; \
add r20=PAL_VPS_SYNC_READ,r20; \
;; \
{ .mii; \
- add r22=VPD(VPSR),r25; \
+ nop 0x0; \
mov r24=ip; \
mov b0=r20; \
;; \
}; \
{ .mmb; \
add r24 = 0x20, r24; \
- mov r16 = cr.ipsr; /* Temp workaround since psr.ic is off */ \
+ nop 0x0; \
br.cond.sptk b0; /* call the service */ \
;; \
}; \
- ld8 r17=[r22]; \
- /* deposite ipsr bit cpl into vpd.vpsr, since epc will change */ \
- extr.u r30=r16, IA64_PSR_CPL0_BIT, 2; \
- ;; \
- dep r17=r30, r17, IA64_PSR_CPL0_BIT, 2; \
- extr.u r30=r16, IA64_PSR_BE_BIT, 5; \
- ;; \
- dep r17=r30, r17, IA64_PSR_BE_BIT, 5; \
- extr.u r30=r16, IA64_PSR_RI_BIT, 2; \
- ;; \
- dep r17=r30, r17, IA64_PSR_RI_BIT, 2; \
- ;; \
- st8 [r22]=r17; \
- ;;
movl r11=FPSR_DEFAULT; /* L-unit */ \
movl r1=__gp; /* establish kernel global pointer */ \
;; \
- PAL_VSA_SYNC_READ_CLEANUP_PSR_PL \
+ PAL_VSA_SYNC_READ \
VMX_MINSTATE_END_SAVE_MIN
/*
physical_tlb_miss(VCPU *vcpu, u64 vadr)
{
u64 pte;
- IA64_PSR vpsr;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
pte = vadr& _PAGE_PPN_MASK;
- pte = pte|(vpsr.cpl<<7)|PHY_PAGE_WB;
+ pte = pte | PHY_PAGE_WB;
thash_purge_and_insert(vcpu, pte, (PAGE_SHIFT<<2), vadr);
return;
}
UINT64 vector,REGS *regs)
{
VCPU *vcpu = current;
- UINT64 vpsr = vmx_vcpu_get_psr(vcpu);
+ UINT64 vpsr = VCPU(vcpu, vpsr);
vector=vec2off[vector];
if(!(vpsr&IA64_PSR_IC)&&(vector!=IA64_DATA_NESTED_TLB_VECTOR)){
panic_domain(regs, "Guest nested fault vector=%lx!\n", vector);
IA64_PSR vpsr;
src=®s->r16;
sunat=®s->eml_unat;
- vpsr.val = vmx_vcpu_get_psr(v);
+ vpsr.val = VCPU(v, vpsr);
if(vpsr.bn){
dst = &VCPU(v, vgr[0]);
dunat =&VCPU(v, vnat);
check_vtlb_sanity(vtlb);
dump_vtlb(vtlb);
#endif
- vpsr.val = vmx_vcpu_get_psr(v);
+ vpsr.val = VCPU(v, vpsr);
misr.val=VMX(v,cr_isr);
if(is_physical_mode(v)&&(!(vadr<<1>>62))){
visr.val = 0;
- vpsr.val = vmx_vcpu_get_psr (vcpu);
+ vpsr.val = VCPU(vcpu, vpsr);
if (!vpsr.ic == 1 ) {
/* Set ISR.ni */
#include <asm/vmx_pal_vsa.h>
#include <asm/kregs.h>
//unsigned long last_guest_rsm = 0x0;
+
+#ifdef VTI_DEBUG
struct guest_psr_bundle{
unsigned long ip;
unsigned long psr;
struct guest_psr_bundle guest_psr_buf[100];
unsigned long guest_psr_index = 0;
+#endif
void
vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value)
UINT64 mask;
REGS *regs;
IA64_PSR old_psr, new_psr;
- old_psr.val=vmx_vcpu_get_psr(vcpu);
+ old_psr.val=VCPU(vcpu, vpsr);
regs=vcpu_regs(vcpu);
/* We only support guest as:
// vpsr.i 0->1
vcpu->arch.irq_new_condition = 1;
}
- new_psr.val=vmx_vcpu_get_psr(vcpu);
+ new_psr.val=VCPU(vcpu, vpsr);
+#ifdef VTI_DEBUG
{
struct pt_regs *regs = vcpu_regs(vcpu);
guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
if (++guest_psr_index >= 100)
guest_psr_index = 0;
}
+#endif
#if 0
if (old_psr.i != new_psr.i) {
if (old_psr.i)
{
// TODO: trap_bounce?? Eddie
REGS *regs = vcpu_regs(vcpu);
- IA64_PSR vpsr;
IA64_PSR *ipsr = (IA64_PSR *)®s->cr_ipsr;
- vpsr.val = vmx_vcpu_get_psr(vcpu);
- if (vpsr.ri == 2) {
- vpsr.ri = 0;
- regs->cr_iip += 16;
+ if (ipsr->ri == 2) {
+ ipsr->ri = 0;
+ regs->cr_iip += 16;
} else {
- vpsr.ri++;
+ ipsr->ri++;
}
- ipsr->ri = vpsr.ri;
- vpsr.val &=
- (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
- IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
- ));
-
- VCPU(vcpu, vpsr) = vpsr.val;
-
ipsr->val &=
(~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
{
REGS *regs = vcpu_regs(vcpu);
IA64_PSR vpsr;
- vpsr.val = vmx_vcpu_get_psr(vcpu);
+ vpsr.val = VCPU(vcpu, vpsr);
if(!vpsr.ic)
VCPU(vcpu,ifs) = regs->cr_ifs;
}
-UINT64
-vmx_vcpu_get_psr(VCPU *vcpu)
-{
- return VCPU(vcpu,vpsr);
-}
-
#if 0
IA64FAULT
vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, UINT64 *val)
#endif
+/*
+ VPSR can't keep track of below bits of guest PSR
+ This function gets guest PSR
+ */
+
+UINT64 vmx_vcpu_get_psr(VCPU *vcpu)
+{
+ UINT64 mask;
+ REGS *regs = vcpu_regs(vcpu);
+ mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
+ IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
+ return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
+}
+
IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24)
{
UINT64 vpsr;
IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val)
{
+ val = (val & MASK(0, 32)) | (vmx_vcpu_get_psr(vcpu) & MASK(32, 32));
vmx_vcpu_set_psr(vcpu, val);
return IA64_NO_FAULT;
}
return vmx_vcpu_set_psr_sm(vcpu,imm24);
}
-unsigned long last_guest_psr = 0x0;
IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst)
{
UINT64 tgt = inst.M33.r1;
*/
val = vmx_vcpu_get_psr(vcpu);
val = (val & MASK(0, 32)) | (val & MASK(35, 2));
- last_guest_psr = val;
return vcpu_set_gr(vcpu, tgt, val, 0);
}
if(vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
panic_domain(vcpu_regs(vcpu),"get_psr nat bit fault\n");
- val = (val & MASK(0, 32)) | (VCPU(vcpu, vpsr) & MASK(32, 32));
-#if 0
- if (last_mov_from_psr && (last_guest_psr != (val & MASK(0,32))))
- while(1);
- else
- last_mov_from_psr = 0;
-#endif
- return vmx_vcpu_set_psr_l(vcpu,val);
+ return vmx_vcpu_set_psr_l(vcpu, val);
}
IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst)
{
u64 r2,r3;
+#ifdef VMAL_NO_FAULT_CHECK
IA64_PSR vpsr;
vpsr.val=vmx_vcpu_get_psr(vcpu);
privilege_op (vcpu);
return IA64_FAULT;
}
+#endif // VMAL_NO_FAULT_CHECK
if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
#ifdef VMAL_NO_FAULT_CHECK
ISR isr;
IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst)
{
u64 r3;
+#ifdef VMAL_NO_FAULT_CHECK
IA64_PSR vpsr;
vpsr.val=vmx_vcpu_get_psr(vcpu);
-#ifdef VMAL_NO_FAULT_CHECK
ISR isr;
if ( vpsr.cpl != 0) {
/* Inject Privileged Operation fault into guest */
IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
{
UINT64 itir, ifa, pte, slot;
+#ifdef VMAL_NO_FAULT_CHECK
IA64_PSR vpsr;
vpsr.val=vmx_vcpu_get_psr(vcpu);
if ( vpsr.ic ) {
illegal_op(vcpu);
return IA64_FAULT;
}
-#ifdef VMAL_NO_FAULT_CHECK
ISR isr;
if ( vpsr.cpl != 0) {
/* Inject Privileged Operation fault into guest */
UINT64 itir, ifa, pte, slot;
#ifdef VMAL_NO_FAULT_CHECK
ISR isr;
-#endif
IA64_PSR vpsr;
vpsr.val=vmx_vcpu_get_psr(vcpu);
if ( vpsr.ic ) {
illegal_op(vcpu);
return IA64_FAULT;
}
-#ifdef VMAL_NO_FAULT_CHECK
if ( vpsr.cpl != 0) {
/* Inject Privileged Operation fault into guest */
set_privileged_operation_isr (vcpu, 0);
IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, u64 *itir, u64 *ifa,u64 *pte)
{
- IA64_PSR vpsr;
IA64FAULT ret1;
+#ifdef VMAL_NO_FAULT_CHECK
+ IA64_PSR vpsr;
vpsr.val=vmx_vcpu_get_psr(vcpu);
if ( vpsr.ic ) {
set_illegal_op_isr(vcpu);
return IA64_FAULT;
}
-#ifdef VMAL_NO_FAULT_CHECK
UINT64 fault;
ISR isr;
if ( vpsr.cpl != 0) {